In [1]:
reload("nnadl-julia/neuralnet.jl")

In [2]:
n = NeuralNet.Network([2;3;2])


Out[2]:
Network([2,3,2],[
3x2 Array{Float64,2}:
 0.207276   1.52676  
 0.849024  -0.159867 
 1.46481    0.0767592,

2x3 Array{Float64,2}:
 -0.714715   0.815781   0.204955
  0.871498  -0.512168  -1.30869 ],[[-0.14313945510741935,1.9696980675544669,0.7109137232695631],[-1.0534671729715952,0.08020436033870665]])

In [3]:
length(n.layers)


Out[3]:
3

Verify backward prop

Manually verify cost derivatives


In [4]:
test_input = [1 1; 2 2]
test_output = [1 1;2 2]


Out[4]:
2x2 Array{Int64,2}:
 1  1
 2  2

Test Bias


In [5]:
delta = 1e-5
function test_bias(network, layer, neuron, delta = 1e-5)
    nb_upper = deepcopy(network)
    nb_lower = deepcopy(network)
    nb_upper.bias[layer][neuron] += delta
    nb_lower.bias[layer][neuron] -= delta
    cost_upper = NeuralNet.cost(nb_upper, test_input, test_output)
    cost_lower = NeuralNet.cost(nb_lower, test_input, test_output)
    (cost_upper - cost_lower)/(2*delta)
end


Out[5]:
test_bias (generic function with 2 methods)

In [6]:
function test_weights(network, layer, neuronA,neuronB, delta = 1e-5)
    nw_upper = deepcopy(network)
    nw_lower = deepcopy(network)
    nw_upper.weights[layer][neuronA,neuronB] += delta
    nw_lower.weights[layer][neuronA,neuronB] -= delta
    costw_upper = NeuralNet.cost(nw_upper, test_input, test_output)
    costw_lower = NeuralNet.cost(nw_lower, test_input, test_output)
    (costw_upper - costw_lower)/(2*delta)
end


Out[6]:
test_weights (generic function with 2 methods)

In [7]:
testDerive = NeuralNet.backpropagate(n,test_input,test_output)
for (layer, numNeurons) in enumerate(n.layers[2:end])
    @printf("Layer %d\n",layer)
    for neuron in 1:numNeurons
        @printf("Bias Neuron %d\n", neuron)
        a = test_bias(n, layer, neuron)
        b = testDerive[1][layer][neuron]
        @Test.test_approx_eq_eps a b 1e-6
    end
    @printf("Weights")
    for neuronA in 1:n.layers[layer], neuronB in 1:n.layers[layer+1]
        a = test_weights(n, layer, neuronB, neuronA)
        b = testDerive[2][layer][neuronB,neuronA]
        @Test.test_approx_eq_eps a b 1e-6
    end
end


Layer 1
Bias Neuron 1
Bias Neuron 2
Bias Neuron 3
WeightsLayer 2
Bias Neuron 1
Bias Neuron 2
Weights

In [10]: